python 超大文件分析Thread seek

葫芦的运维日志

下一篇 搜索 上一篇

浏览量 4029

2018/09/16 08:15


#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2018/9/16 上午2:00
# @Author  : BrownWang
# @Email   : 277215243@qq.com
# @File    : Analysis.py
# @Software: PyCharm
import re
import heapq
import threading
from multiprocessing import Pool


dic={}
fdic={}
def readconfig():
    with open('./ipdb_cn.txt',mode='r') as f:
        for i in f:
            nn=i.split()
            tn= nn[2].decode('utf-8')
            if dic.has_key(tn):
                dic[tn].add('.'.join(nn[0].split('.')[0:3]))
            else:
                dic[tn]=set()
                dic[tn].add('.'.join(nn[0].split('.')[0:3]))
                fdic[tn]=0


t=threading.Thread(target=readconfig)
t.start()

tf=open('./flowdata.log','r')
tf.seek(0,2)
total=tf.tell()
filset=set()
def run(start,end):

    with open('./flowdata.log','r') as f:
        f.seek(start,0)
        for i in f:
            if f.tell() > end:
                break
            if '_ip' in i:
                filset.add(re.findall(r'_ip:\s*(\d+\.\d+\.\d+)\.\d+',i)[0])

results=[]
threads=[]
for i in range(8):
    t=threading.Thread(target=run,args=(total*i/8,total*(i+1)/8))
    threads.append(t)

for t in threads:
    t.start()
    t.join()
sumfil=len(filset)
t.join()
for k in dic:
    for i in filset:
        if i in dic[k]:
            fdic[k]+=1
ret=[{'n':k,'v':fdic[k]/float(sumfil)*100} for k in fdic]
sortl=heapq.nlargest(len(ret),ret,key=lambda s:s['v'])
for i in sortl:
    print i['n'] + '   ' + str(round(i['v'],2))+'%'

 

葫芦的运维日志

打赏

上一篇 搜索 下一篇
© 冰糖葫芦甜(bthlt.com) 2021 王梓打赏联系方式 陕ICP备17005322号-1